/* These functions update shadow entries (and do bookkeeping on the shadow
* tables they are in). It is intended that they are the only
* functions which ever write (non-zero) data onto a shadow page.
- *
- * They return a set of flags:
- * SHADOW_SET_CHANGED -- we actually wrote a new value to the shadow.
- * SHADOW_SET_FLUSH -- the caller must cause a TLB flush.
- * SHADOW_SET_ERROR -- the input is not a valid entry (for example, if
- * shadow_get_page_from_l1e() fails).
*/
static inline void safe_write_entry(void *dst, void *src)
| (((unsigned long)sl4e) & ~PAGE_MASK));
if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
- {
/* About to install a new reference */
- sh_get_ref(shadow_l4e_get_mfn(new_sl4e), paddr);
- }
+ if ( !sh_get_ref(shadow_l4e_get_mfn(new_sl4e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
| (((unsigned long)sl3e) & ~PAGE_MASK));
- if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
- {
+ if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
/* About to install a new reference */
- sh_get_ref(shadow_l3e_get_mfn(new_sl3e), paddr);
- }
+ if ( !sh_get_ref(shadow_l3e_get_mfn(new_sl3e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn);
| (((unsigned long)sl2e) & ~PAGE_MASK));
if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
- {
/* About to install a new reference */
- sh_get_ref(shadow_l2e_get_mfn(new_sl2e), paddr);
- }
+ if ( !sh_get_ref(shadow_l2e_get_mfn(new_sl2e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
*sl3mfn, &new_sl4e, ft);
r = shadow_set_l4e(v, sl4e, new_sl4e, sl4mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
}
/* Now follow it down a level. Guaranteed to succeed. */
return sh_linear_l3_table(v) + shadow_l3_linear_offset(gw->va);
if ( !valid_mfn(gw->l2mfn) ) return NULL; /* No guest page. */
/* Get the l3e */
sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft);
- ASSERT(sl3e != NULL); /* Since we know guest PT is valid this far */
+ if ( sl3e == NULL ) return NULL;
if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
{
*sl2mfn = shadow_l3e_get_mfn(*sl3e);
*sl2mfn, &new_sl3e, ft);
r = shadow_set_l3e(v, sl3e, new_sl3e, sl3mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
}
/* Now follow it down a level. Guaranteed to succeed. */
return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
*sl1mfn, &new_sl2e, ft);
r = shadow_set_l2e(v, sl2e, new_sl2e, sl2mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
/* This next line is important: in 32-on-PAE and 32-on-64 modes,
* the guest l1 table has an 8k shadow, and we need to return
* the right mfn of the pair. This call will set it for us as a
/* Acquire the shadow. This must happen before we figure out the rights
* for the shadow entry, since we might promote a page here. */
- // XXX -- this code will need to change somewhat if/when the shadow code
- // can directly map superpages...
ptr_sl1e = shadow_get_and_create_l1e(v, &gw, &sl1mfn, ft);
- ASSERT(ptr_sl1e);
+ if ( unlikely(ptr_sl1e == NULL) )
+ {
+ /* Couldn't get the sl1e! Since we know the guest entries
+ * are OK, this can only have been caused by a failed
+ * shadow_set_l*e(), which will have crashed the guest.
+ * Get out of the fault handler immediately. */
+ ASSERT(test_bit(_DOMF_dying, &d->domain_flags));
+ shadow_unlock(d);
+ return 0;
+ }
/* Calculate the shadow entry and write it */
l1e_propagate_from_guest(v, (gw.l1e) ? gw.l1e : &gw.eff_l1e, gw.l1mfn,
smfn = get_shadow_status(v, gmfn, root_type);
if ( valid_mfn(smfn) )
{
- /* Pull this root shadow to the front of the list of roots. */
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
- list_del(&sp->list);
- list_add(&sp->list, &d->arch.shadow.toplevel_shadows);
+ /* Pull this root shadow out of the list of roots (we will put
+ * it back in at the head). */
+ list_del(&mfn_to_shadow_page(smfn)->list);
}
else
{
shadow_prealloc(d, SHADOW_MAX_ORDER);
/* Shadow the page. */
smfn = sh_make_shadow(v, gmfn, root_type);
- list_add(&mfn_to_shadow_page(smfn)->list,
- &d->arch.shadow.toplevel_shadows);
}
ASSERT(valid_mfn(smfn));
mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
#endif
- /* Take a ref to this page: it will be released in sh_detach_old_tables()
- * or in the next call to sh_set_toplevel_shadow(). */
- sh_get_ref(smfn, 0);
- sh_pin(smfn);
+ /* Pin the shadow and put it (back) on the list of top-level shadows */
+ if ( sh_pin(smfn) )
+ list_add(&mfn_to_shadow_page(smfn)->list,
+ &d->arch.shadow.toplevel_shadows);
+ else
+ {
+ SHADOW_ERROR("can't pin %#lx as toplevel shadow\n", mfn_x(smfn));
+ domain_crash(v->domain);
+ }
+
+ /* Take a ref to this page: it will be released in sh_detach_old_tables. */
+ if ( !sh_get_ref(smfn, 0) )
+ {
+ SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
+ domain_crash(v->domain);
+ }
/* Done. Install it */
SHADOW_PRINTK("%u/%u [%u] gmfn %#"SH_PRI_mfn" smfn %#"SH_PRI_mfn"\n",
#endif
#endif
mfn_t sl1mfn;
-
+ int r;
/* Carefully look in the shadow linear map for the l1e we expect */
#if SHADOW_PAGING_LEVELS >= 4
/* Found it! Need to remove its write permissions. */
sl1mfn = shadow_l2e_get_mfn(*sl2p);
sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW);
- shadow_set_l1e(v, sl1p, sl1e, sl1mfn);
+ r = shadow_set_l1e(v, sl1p, sl1e, sl1mfn);
+ ASSERT( !(r & SHADOW_SET_ERROR) );
return 1;
}
#endif
&& (flags & _PAGE_RW)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
{
- shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
+ (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
/* Remember the last shadow that we shot a writeable mapping in */
v->arch.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
{
- shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
+ (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
if ( (mfn_to_page(target_mfn)->count_info & PGC_count_mask) == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
switch ( mfn_to_shadow_page(smfn)->type )
{
case SH_type_l1_shadow:
- shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
+ (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
case SH_type_l2_shadow:
#if GUEST_PAGING_LEVELS == 3
case SH_type_l2h_shadow:
#endif
- shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
+ (void) shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
#if GUEST_PAGING_LEVELS >= 4
case SH_type_l3_shadow:
- shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
+ (void) shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
case SH_type_l4_shadow:
- shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
+ (void) shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
#endif
default: BUG(); /* Called with the wrong kind of shadow. */
}
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
{
- shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
+ (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
{
- shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
+ (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
{
- shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
+ (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
+/******************************************************************************
+ * Flags used in the return value of the shadow_set_lXe() functions...
+ */
+
+/* We actually wrote something new to the shadow */
+#define SHADOW_SET_CHANGED 0x1
+/* Caller should flush TLBs to clear the old entry */
+#define SHADOW_SET_FLUSH 0x2
+/* Something went wrong: the shadow entry was invalid or refcount failed */
+#define SHADOW_SET_ERROR 0x4
+
+
/******************************************************************************
* MFN/page-info handling
*/
/* Increase the refcount of a shadow page. Arguments are the mfn to refcount,
* and the physical address of the shadow entry that holds the ref (or zero
- * if the ref is held by something else) */
-static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
+ * if the ref is held by something else).
+ * Returns 0 for failure, 1 for success. */
+static inline int sh_get_ref(mfn_t smfn, paddr_t entry_pa)
{
u32 x, nx;
struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
{
SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
sp->backpointer, mfn_x(smfn));
- domain_crash_synchronous();
+ return 0;
}
/* Guarded by the shadow lock, so no need for atomic update */
/* We remember the first shadow entry that points to each shadow. */
if ( entry_pa != 0 && sp->up == 0 )
sp->up = entry_pa;
+
+ return 1;
}
if ( unlikely(x == 0) )
{
- SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
- mfn_x(smfn), sp->count, sp->type);
- domain_crash_synchronous();
+ SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
+ mfn_x(smfn), sp->count, sp->type);
+ BUG();
}
/* Guarded by the shadow lock, so no need for atomic update */
}
-/* Pin a shadow page: take an extra refcount and set the pin bit. */
-static inline void sh_pin(mfn_t smfn)
+/* Pin a shadow page: take an extra refcount and set the pin bit.
+ * Returns 0 for failure, 1 for success. */
+static inline int sh_pin(mfn_t smfn)
{
struct shadow_page_info *sp;
sp = mfn_to_shadow_page(smfn);
if ( !(sp->pinned) )
{
- sh_get_ref(smfn, 0);
+ if ( !sh_get_ref(smfn, 0) )
+ return 0;
sp->pinned = 1;
}
+ return 1;
}
/* Unpin a shadow page: unset the pin bit and release the extra ref. */